evtchn_unmask(pirq_to_evtchn(d, pirq));
spin_unlock(&d->event_lock);
}
- return pirq_guest_eoi(d, pirq);
+ return pirq_guest_eoi(pirq);
}
long do_pirq_guest_eoi(int pirq)
return ACKTYPE_NONE;
}
-int pirq_guest_eoi(struct domain *d, struct pirq *pirq)
+int pirq_guest_eoi(struct pirq *pirq)
{
irq_desc_t *desc;
irq_guest_action_t *action;
pirq = pirqs[i]->pirq;
if ( pirqs[i]->masked &&
!test_bit(pirqs[i]->evtchn, &s->evtchn_mask[0]) )
- pirq_guest_eoi(d, pirqs[i]);
+ pirq_guest_eoi(pirqs[i]);
}
} while ( ++pirq < d->nr_pirqs && n == ARRAY_SIZE(pirqs) );
flush_ready_eoi();
}
-void pirq_guest_eoi(struct domain *d, struct pirq *pirq)
+void pirq_guest_eoi(struct pirq *pirq)
{
struct irq_desc *desc;
ASSERT(local_irq_is_enabled());
desc = pirq_spin_lock_irq_desc(pirq, NULL);
if ( desc )
- desc_guest_eoi(d, desc, pirq);
+ desc_guest_eoi(desc, pirq);
}
-void desc_guest_eoi(struct domain *d, struct irq_desc *desc, struct pirq *pirq)
+void desc_guest_eoi(struct irq_desc *desc, struct pirq *pirq)
{
irq_guest_action_t *action;
cpumask_t cpu_eoi_map;
pirq = pirqs[i]->pirq;
if ( pirqs[i]->masked &&
!test_bit(pirqs[i]->evtchn, &shared_info(d, evtchn_mask)) )
- pirq_guest_eoi(d, pirqs[i]);
+ pirq_guest_eoi(pirqs[i]);
}
} while ( ++pirq < d->nr_pirqs && n == ARRAY_SIZE(pirqs) );
evtchn_unmask(pirq->evtchn);
if ( !is_hvm_domain(v->domain) ||
pirq->arch.hvm.emuirq == IRQ_PT )
- pirq_guest_eoi(v->domain, pirq);
+ pirq_guest_eoi(pirq);
spin_unlock(&v->domain->event_lock);
ret = 0;
break;
{
pirq_dpci->masked = 0;
pirq_dpci->pending = 0;
- pirq_guest_eoi(d, dpci_pirq(pirq_dpci));
+ pirq_guest_eoi(dpci_pirq(pirq_dpci));
}
return 0;
if ( pirq_dpci->gmsi.gvec != pt_irq_bind->u.msi.gvec ||
pirq_dpci->gmsi.gflags != pt_irq_bind->u.msi.gflags) {
/* Directly clear pending EOIs before enabling new MSI info. */
- pirq_guest_eoi(d, info);
+ pirq_guest_eoi(info);
pirq_dpci->gmsi.gvec = pt_irq_bind->u.msi.gvec;
pirq_dpci->gmsi.gflags = pt_irq_bind->u.msi.gflags;
#ifdef SUPPORT_MSI_REMAPPING
/* called with d->event_lock held */
-static void __msi_pirq_eoi(struct domain *d, struct hvm_pirq_dpci *pirq_dpci)
+static void __msi_pirq_eoi(struct hvm_pirq_dpci *pirq_dpci)
{
irq_desc_t *desc;
return;
desc->status &= ~IRQ_INPROGRESS;
- desc_guest_eoi(d, desc, pirq);
+ desc_guest_eoi(desc, pirq);
}
}
if ( vlapic_match_dest(vcpu_vlapic(current), NULL, 0, dest,
dest_mode) )
{
- __msi_pirq_eoi(d, pirq_dpci);
+ __msi_pirq_eoi(pirq_dpci);
return 1;
}
}
if ( pirq_dpci->flags & HVM_IRQ_DPCI_TRANSLATE )
{
/* for translated MSI to INTx interrupt, eoi as early as possible */
- __msi_pirq_eoi(d, pirq_dpci);
+ __msi_pirq_eoi(pirq_dpci);
}
#endif
}
return;
stop_timer(&pirq_dpci->timer);
- pirq_guest_eoi(d, pirq);
+ pirq_guest_eoi(pirq);
}
void hvm_dpci_eoi(struct domain *d, unsigned int guest_gsi,
if ( --pirq_dpci->pending == 0 )
{
stop_timer(&pirq_dpci->timer);
- pirq_guest_eoi(d, dpci_pirq(pirq_dpci));
+ pirq_guest_eoi(dpci_pirq(pirq_dpci));
}
}
}
#define pirq_cleanup_check(pirq, d) \
((pirq)->evtchn ? pirq_cleanup_check(pirq, d) : (void)0)
-extern void pirq_guest_eoi(struct domain *, struct pirq *);
-extern void desc_guest_eoi(struct domain *, struct irq_desc *, struct pirq *);
+extern void pirq_guest_eoi(struct pirq *);
+extern void desc_guest_eoi(struct irq_desc *, struct pirq *);
extern int pirq_guest_unmask(struct domain *d);
extern int pirq_guest_bind(struct vcpu *, struct pirq *, int will_share);
extern void pirq_guest_unbind(struct domain *d, struct pirq *);